The following code is taken from keras blog.


In [1]:
from keras.layers import Input, Dense
from keras.models import Model

# this is the size of our encoded representations
encoding_dim = 32  # 32 floats -> compression of factor 24.5, assuming the input is 784 floats

# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)

# this model maps an input to its reconstruction
autoencoder = Model(input=input_img, output=decoded)


Using Theano backend.

In [2]:
# this model maps an input to its encoded representation
encoder = Model(input=input_img, output=encoded)

In [3]:
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(input=encoded_input, output=decoder_layer(encoded_input))

In [4]:
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')

In [5]:
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()


Downloading data from https://s3.amazonaws.com/img-datasets/mnist.pkl.gz
15294464/15296311 [============================>.] - ETA: 0s

In [6]:
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print x_train.shape
print x_test.shape


(60000, 784)
(10000, 784)

In [8]:
autoencoder.fit(x_train, x_train,
                verbose=1,
                nb_epoch=50,
                batch_size=256,
                shuffle=True,
                validation_data=(x_test, x_test))


Train on 60000 samples, validate on 10000 samples
Epoch 1/50
60000/60000 [==============================] - 11s - loss: 0.3762 - val_loss: 0.2727
Epoch 2/50
60000/60000 [==============================] - 12s - loss: 0.2656 - val_loss: 0.2558
Epoch 3/50
60000/60000 [==============================] - 12s - loss: 0.2457 - val_loss: 0.2336
Epoch 4/50
60000/60000 [==============================] - 11s - loss: 0.2254 - val_loss: 0.2155
Epoch 5/50
60000/60000 [==============================] - 12s - loss: 0.2099 - val_loss: 0.2019
Epoch 6/50
60000/60000 [==============================] - 13s - loss: 0.1977 - val_loss: 0.1911
Epoch 7/50
60000/60000 [==============================] - 13s - loss: 0.1883 - val_loss: 0.1831
Epoch 8/50
60000/60000 [==============================] - 13s - loss: 0.1812 - val_loss: 0.1768
Epoch 9/50
60000/60000 [==============================] - 11s - loss: 0.1754 - val_loss: 0.1715
Epoch 10/50
60000/60000 [==============================] - 10s - loss: 0.1705 - val_loss: 0.1670
Epoch 11/50
60000/60000 [==============================] - 12s - loss: 0.1660 - val_loss: 0.1626
Epoch 12/50
60000/60000 [==============================] - 14s - loss: 0.1618 - val_loss: 0.1586
Epoch 13/50
60000/60000 [==============================] - 14s - loss: 0.1580 - val_loss: 0.1549
Epoch 14/50
60000/60000 [==============================] - 14s - loss: 0.1543 - val_loss: 0.1512
Epoch 15/50
60000/60000 [==============================] - 14s - loss: 0.1508 - val_loss: 0.1478
Epoch 16/50
60000/60000 [==============================] - 13s - loss: 0.1476 - val_loss: 0.1448
Epoch 17/50
60000/60000 [==============================] - 14s - loss: 0.1446 - val_loss: 0.1418
Epoch 18/50
60000/60000 [==============================] - 13s - loss: 0.1418 - val_loss: 0.1390
Epoch 19/50
60000/60000 [==============================] - 13s - loss: 0.1393 - val_loss: 0.1365
Epoch 20/50
60000/60000 [==============================] - 12s - loss: 0.1369 - val_loss: 0.1344
Epoch 21/50
60000/60000 [==============================] - 14s - loss: 0.1347 - val_loss: 0.1321
Epoch 22/50
60000/60000 [==============================] - 14s - loss: 0.1326 - val_loss: 0.1301
Epoch 23/50
60000/60000 [==============================] - 14s - loss: 0.1306 - val_loss: 0.1281
Epoch 24/50
60000/60000 [==============================] - 14s - loss: 0.1287 - val_loss: 0.1262
Epoch 25/50
60000/60000 [==============================] - 14s - loss: 0.1269 - val_loss: 0.1245
Epoch 26/50
60000/60000 [==============================] - 14s - loss: 0.1252 - val_loss: 0.1228
Epoch 27/50
60000/60000 [==============================] - 15s - loss: 0.1236 - val_loss: 0.1212
Epoch 28/50
60000/60000 [==============================] - 13s - loss: 0.1220 - val_loss: 0.1197
Epoch 29/50
60000/60000 [==============================] - 12s - loss: 0.1205 - val_loss: 0.1182
Epoch 30/50
60000/60000 [==============================] - 12s - loss: 0.1191 - val_loss: 0.1168
Epoch 31/50
60000/60000 [==============================] - 12s - loss: 0.1178 - val_loss: 0.1155
Epoch 32/50
60000/60000 [==============================] - 12s - loss: 0.1165 - val_loss: 0.1143
Epoch 33/50
60000/60000 [==============================] - 13s - loss: 0.1153 - val_loss: 0.1131
Epoch 34/50
60000/60000 [==============================] - 13s - loss: 0.1142 - val_loss: 0.1120
Epoch 35/50
60000/60000 [==============================] - 12s - loss: 0.1131 - val_loss: 0.1111
Epoch 36/50
60000/60000 [==============================] - 13s - loss: 0.1121 - val_loss: 0.1100
Epoch 37/50
60000/60000 [==============================] - 14s - loss: 0.1112 - val_loss: 0.1091
Epoch 38/50
60000/60000 [==============================] - 12s - loss: 0.1103 - val_loss: 0.1083
Epoch 39/50
60000/60000 [==============================] - 14s - loss: 0.1095 - val_loss: 0.1075
Epoch 40/50
60000/60000 [==============================] - 13s - loss: 0.1087 - val_loss: 0.1068
Epoch 41/50
60000/60000 [==============================] - 12s - loss: 0.1080 - val_loss: 0.1061
Epoch 42/50
60000/60000 [==============================] - 14s - loss: 0.1073 - val_loss: 0.1054
Epoch 43/50
60000/60000 [==============================] - 14s - loss: 0.1067 - val_loss: 0.1048
Epoch 44/50
60000/60000 [==============================] - 14s - loss: 0.1061 - val_loss: 0.1042
Epoch 45/50
60000/60000 [==============================] - 15s - loss: 0.1055 - val_loss: 0.1037
Epoch 46/50
60000/60000 [==============================] - 15s - loss: 0.1050 - val_loss: 0.1031
Epoch 47/50
60000/60000 [==============================] - 15s - loss: 0.1045 - val_loss: 0.1027
Epoch 48/50
60000/60000 [==============================] - 15s - loss: 0.1040 - val_loss: 0.1022
Epoch 49/50
60000/60000 [==============================] - 15s - loss: 0.1036 - val_loss: 0.1018
Epoch 50/50
60000/60000 [==============================] - 15s - loss: 0.1032 - val_loss: 0.1013
Out[8]:
<keras.callbacks.History at 0x7f0bd56606d0>

In [9]:
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)


INFO (theano.gof.compilelock): Refreshing lock /home/agarwalnaimish/.theano/compiledir_Linux-4.4--generic-x86_64-with-Ubuntu-16.04-xenial-x86_64-2.7.12-64/lock_dir/lock

In [10]:
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt

n = 10  # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(x_test[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i].reshape(28, 28))
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()



In [ ]: